{
unsigned long *p, page;
- page = __pa(per_cpu(cur_pgd, smp_processor_id()));
+ preempt_disable();
+ page = __pa(per_cpu(cur_pgd, smp_processor_id()));
+ preempt_enable();
+
p = (unsigned long *)__va(page);
p += (address >> 30) * 2;
printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
{
unsigned long page;
+ preempt_disable();
page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
[address >> 22];
+ preempt_enable();
+
+ page = ((unsigned long *) per_cpu(cur_pgd, get_cpu()))
+ [address >> 22];
printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
machine_to_phys(page));
/*
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
+ preempt_disable();
pgd = index + per_cpu(cur_pgd, smp_processor_id());
+ preempt_enable();
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
pmd_t *pmd;
pte_t *pte;
+ preempt_disable();
pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
+ preempt_enable();
pgd += pgd_index(address);
printk("PGD %lx ", pgd_val(*pgd));
/* On Xen the line below does not always work. Needs investigating! */
/*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
+ preempt_disable();
pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
+ preempt_enable();
pgd += pgd_index(address);
pgd_ref = pgd_offset_k(address);